#include <xen/spinlock.h>
#include <xen/slab.h>
#include <xen/irq.h>
+#include <xen/softirq.h>
#include <asm/domain_page.h>
/*
{
int i, drop_dom_ref;
struct domain *d = pg->u.inuse.domain;
- void *p;
ASSERT(!in_irq());
pg[i].tlbflush_timestamp = tlbflush_current_time();
pg[i].u.free.cpu_mask = 1 << d->processor;
list_del(&pg[i].list);
+ }
+
+ d->tot_pages -= 1 << order;
+ drop_dom_ref = (d->tot_pages == 0);
+
+ spin_unlock_recursive(&d->page_alloc_lock);
+ if ( likely(!test_bit(DF_DYING, &d->flags)) )
+ {
+ free_heap_pages(MEMZONE_DOM, pg, order);
+ }
+ else
+ {
/*
* Normally we expect a domain to clear pages before freeing them,
* if it cares about the secrecy of their contents. However, after
* a domain has died we assume responsibility for erasure.
*/
- if ( unlikely(test_bit(DF_DYING, &d->flags)) )
+ for ( i = 0; i < (1 << order); i++ )
{
- p = map_domain_mem(page_to_phys(&pg[i]));
- clear_page(p);
- unmap_domain_mem(p);
+ spin_lock(&page_scrub_lock);
+ list_add(&pg[i].list, &page_scrub_list);
+ spin_unlock(&page_scrub_lock);
}
}
-
- d->tot_pages -= 1 << order;
- drop_dom_ref = (d->tot_pages == 0);
-
- spin_unlock_recursive(&d->page_alloc_lock);
-
- free_heap_pages(MEMZONE_DOM, pg, order);
}
else
{
{
return avail[MEMZONE_DOM];
}
+
+
+
+/*************************
+ * PAGE SCRUBBING
+ */
+
+static spinlock_t page_scrub_lock;
+struct list_head page_scrub_list;
+
+static void page_scrub_softirq(void)
+{
+ struct list_head *ent;
+ struct pfn_info *pg;
+ void *p;
+ int i;
+ s_time_t start = NOW();
+
+ /* Aim to do 1ms of work (ten percent of a 10ms jiffy). */
+ do {
+ spin_lock(&page_scrub_lock);
+
+ if ( unlikely((ent = page_scrub_list.next) == &page_scrub_list) )
+ {
+ spin_unlock(&page_scrub_lock);
+ return;
+ }
+
+ /* Peel up to 16 pages from the list. */
+ for ( i = 0; i < 16; i++ )
+ if ( (ent = ent->next) == &page_scrub_list )
+ break;
+
+ /* Remove peeled pages from the list. */
+ ent->next->prev = &page_scrub_list;
+ page_scrub_list.next = ent->next;
+
+ spin_unlock(&page_scrub_lock);
+
+ /* Working backwards, scrub each page in turn. */
+ while ( ent != &page_scrub_list )
+ {
+ pg = list_entry(ent, struct pfn_info, list);
+ ent = ent->prev;
+ p = map_domain_mem(page_to_phys(pg));
+ clear_page(p);
+ unmap_domain_mem(p);
+ free_heap_pages(MEMZONE_DOM, pg, 0);
+ }
+ } while ( (NOW() - start) < MILLISECS(1) );
+}
+
+static __init int page_scrub_init(void)
+{
+ spin_lock_init(&page_scrub_lock);
+ INIT_LIST_HEAD(&page_scrub_list);
+ open_softirq(PAGE_SCRUB_SOFTIRQ, page_scrub_softirq);
+ return 0;
+}
+__initcall(page_scrub_init);
#ifndef __XEN_MM_H__
#define __XEN_MM_H__
+#include <xen/config.h>
+#include <xen/list.h>
+#include <xen/spinlock.h>
+
struct domain;
struct pfn_info;
#define alloc_domheap_page(_d) (alloc_domheap_pages(_d,0))
#define free_domheap_page(_p) (free_domheap_pages(_p,0))
+/* Automatic page scrubbing for dead domains. */
+extern spinlock_t page_scrub_lock;
+extern struct list_head page_scrub_list;
+#define page_scrub_schedule_work() \
+ do { \
+ if ( !list_empty(&page_scrub_list) ) \
+ raise_softirq(PAGE_SCRUB_SOFTIRQ); \
+ } while ( 0 )
+
#include <asm/mm.h>
#endif /* __XEN_MM_H__ */